void svm_restore_msrs(struct vcpu *v)
{
}
+#endif
#define IS_CANO_ADDRESS(add) 1
return 0;
}
+#ifdef __x86_64__
HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n",
msr_content);
+#else
+ HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %llx\n",
+ msr_content);
+#endif
regs->eax = msr_content & 0xffffffff;
regs->edx = msr_content >> 32;
struct vcpu *vc = current;
struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb;
+#ifdef __x86_64__
HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %lx msr_content %lx\n",
regs->ecx, msr_content);
+#else
+ HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %x msr_content %llx\n",
+ regs->ecx, msr_content);
+#endif
switch (regs->ecx)
{
case MSR_EFER:
+#ifdef __x86_64__
if ((msr_content & EFER_LME) ^ test_bit(SVM_CPU_STATE_LME_ENABLED,
&vc->arch.hvm_svm.cpu_state))
{
if ((msr_content ^ vmcb->efer) & EFER_LME)
msr_content &= ~EFER_LME;
/* No update for LME/LMA since it have no effect */
+#endif
vmcb->efer = msr_content | EFER_SVME;
break;
return 1;
}
-#else
-static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
-{
- return 0;
-}
-
-static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
-{
- return 0;
-}
-#endif
-
void svm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8])
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
if (input == 1)
{
-#ifndef __x86_64__
if ( hvm_apic_support(v->domain) &&
!vlapic_global_enabled((VLAPIC(v))) )
-#endif
clear_bit(X86_FEATURE_APIC, &edx);
#if CONFIG_PAGING_LEVELS < 3